Load packages

library(tidyverse)
library(janitor)
library(cowplot)
library(here)
library(readxl)
library(Matrix)
library(lme4)
library(lmerTest)
library(TOSTER)
library(eyetrackingR)
library(wesanderson)
library(gghalves)
theme_set(theme_cowplot())

knitr::opts_chunk$set(cache = FALSE, warn = FALSE,warning=FALSE, message = FALSE)

Load data

data_file_path <- here::here("data_analysis","registered_report","data","processed_data","CATegories_exp2_processed_data_anonymized.csv")

d <- read_csv(data_file_path)

## add general category properties from typicality dataset
## typicality
animal_rating_names <- read.csv(here::here("data_analysis","registered_report","data","processed_data","animal_ratings_stimuli_full.csv"))
animal_stims <- unique(c(unique(d$left_image),unique(d$right_image)))
typicality <- read.csv(here::here("data_analysis","registered_report","data","processed_data","typicality_animals_summarized.csv")) %>%
  mutate(item_name=str_remove(animal_name,pattern=" ")) %>%
  left_join(animal_rating_names) %>%
  mutate(image_name_resized = str_replace(image_experiment_name,".jpg","")) %>%
  filter(image_name_resized %in% animal_stims)
demographics <- read.csv(here::here("data_analysis","registered_report","data","processed_data","CATegories_exp2_deidentified_demographics.csv"))
d <- d %>%
  left_join(typicality %>% select(image_name_resized,category,typicality_subjective),by=c("target_image" = "image_name_resized")) %>%
  rename(typicality_condition=typicality_subjective)%>%
  filter(395<age & age<577) #filter out children outside of the age bounds (older than 13 months and younger than 19 months)

Useable trial summary

In order for a trial to be included, participants must contribute at least 50% looking during the windows of interest when computing baseline-corrected proportion target looking: the critical window (300 ms - 2800 ms relative to target word onset) and the baseline window (-2000 ms - 0 ms relative to target word onset).

critical_window <- c(300,2800)
baseline_window <- c(-2000,0)

summarize_subj_useable_trials_critical_window <- d %>%
  filter(corrected_time_centered>=critical_window[1]&corrected_time_centered<=critical_window[2]) %>%
  group_by(sub_num,age,age_mo, child_gender, session,trial_order,trial_number,target_image,target_typicality_z,condition) %>%
  summarize(
    length_critical_window=n(),
    useable_frames_critical_window=sum(!is.na(accuracy_transformed)),
    percent_useable_critical_window=useable_frames_critical_window/length_critical_window,
    useable_critical_window=ifelse(percent_useable_critical_window>=0.5,1,0), #useable if at least 50% looking
    mean_target_looking_critical=mean(accuracy_transformed,na.rm=TRUE)
  )

summarize_subj_useable_trials_baseline_window <- d %>%
  filter(corrected_time_centered>=baseline_window[1] & corrected_time_centered<=baseline_window[2]) %>%
  group_by(sub_num, session,age,age_mo, child_gender, trial_order,trial_number,target_image,target_typicality_z,condition) %>%
  summarize(
    length_baseline_window=n(),
    useable_frames_baseline_window=sum(!is.na(accuracy_transformed)),
    percent_useable_baseline_window=useable_frames_baseline_window/length_baseline_window,
    useable_baseline_window=ifelse(percent_useable_baseline_window>=0.5,1,0), #useable if at least 50% looking
    mean_target_looking_baseline=mean(accuracy_transformed,na.rm=TRUE)
  )

#overall useable trials
summarize_subj_useable_trials <- summarize_subj_useable_trials_critical_window %>%
  left_join(summarize_subj_useable_trials_baseline_window) %>%
  mutate(
    useable_window = ifelse(useable_baseline_window==1&useable_critical_window==1,1,0),
    corrected_target_looking = mean_target_looking_critical - mean_target_looking_baseline
  )
  

summarize_useable_trials <- summarize_subj_useable_trials %>%
  group_by(sub_num, age, child_gender, session,trial_order) %>%
  summarize(
    num_useable_trials=sum(useable_window),
    num_useable_trials_critical_window = sum(useable_critical_window)
  )

#total trials
summarize_subj_trials <- summarize_useable_trials %>%
  ungroup() %>%
  group_by(sub_num) %>%
  summarize(
    session_num = n(),
    total_trials = sum(num_useable_trials),
    total_trials_critical_window = sum(num_useable_trials_critical_window),
    exclude_participant = ifelse(total_trials<24,1,0),
    exclude_participant_critical = ifelse(total_trials_critical_window<24,1,0)
  )

#average trials contributed
mean(summarize_subj_trials$total_trials)
## [1] 29.11278
#participants to exclude based on data contribution
sum(summarize_subj_trials$exclude_participant)
## [1] 48
#join with main data frame
summarize_useable_trials <- summarize_useable_trials %>%
  left_join(summarize_subj_trials)
d <- d %>%
  left_join(summarize_useable_trials) %>%
  left_join(summarize_subj_useable_trials)

summarize_useable_trials_wide <- summarize_useable_trials %>%
  ungroup() %>%
  select(sub_num,session_num,total_trials,exclude_participant,session,num_useable_trials) %>%
  group_by(sub_num,session_num,total_trials,exclude_participant) %>%
  pivot_wider(
    names_from = "session",
    names_prefix = "num_trials_session_",
    values_from = "num_useable_trials"
  )

#write out useable trial summary
write_csv(summarize_useable_trials_wide,here::here("data_analysis","registered_report","data","processed_data","CATegories_exp2_useable_trial_summary.csv"))

Overall, among the trials contributed by the 133 participants, 82.5% of trials contained sufficient looking to meet our trial-level inclusion criteria (at least 50% looking during both the baseline window and the critical window). 85 of the 133 participants contributed sufficient looking data on at least half of the experimental trials (overall M = 29.1)

Summarize participant information

#summarize subj info
subj_info_multisession <- d %>%
  distinct(sub_num, age,months,age_mo,child_gender,trial_order) %>%
  mutate(
    age_mo_c = age_mo - mean(age_mo),
    age_c = age - mean(age)
  )

subj_info <- d %>%
  distinct(sub_num,child_gender) %>%
  summarize(
    N = n(),
    N_female = sum(child_gender=="f")
  )

overall_subj_info <- subj_info_multisession %>%
  summarize(
    N = length(unique(sub_num)),
    sessions = n(),
    mean_age = mean(age_mo),
    min_age = min(age),
    max_age = max(age),
    sd_age = sd(age_mo)
  ) %>%
  left_join(subj_info)
  
overall_subj_info %>%
  knitr::kable()
N sessions mean_age min_age max_age sd_age N_female
133 234 15.6906 410 574 1.541277 67
# subjects with usable trial data only

subj_info_multisession_usable_trials <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num, age,months,age_mo,child_gender,trial_order) %>%
  mutate(
    age_mo_c = age_mo - mean(age_mo),
    age_c = age - mean(age)
  )

subj_info_usable_trials <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num,child_gender) %>%
  summarize(
    N = n(),
    N_female = sum(child_gender=="f")
  )

overall_subj_info_usable_trials <- subj_info_multisession_usable_trials %>%
  summarize(
    N = length(unique(sub_num)),
    sessions = n(),
    mean_age = mean(age_mo),
    min_age = min(age),
    max_age = max(age),
    sd_age = sd(age_mo)
  ) %>%
  left_join(subj_info_usable_trials)
  
overall_subj_info_usable_trials %>%
  knitr::kable()
N sessions mean_age min_age max_age sd_age N_female
85 166 15.72289 410 574 1.494136 52

Summarize subject-level accuracy

Overall

Here, we summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking.

# critical window only
## trial-level
trial_critical_window_accuracy <- d %>%
  filter(exclude_participant_critical==0) %>%
  filter(useable_critical_window==1) %>%
  filter(corrected_time_centered>=300&corrected_time_centered<=2800) %>%
  group_by(sub_num, age,age_mo, child_gender, trial_order,trial_number,category,target_image,target_typicality_z,condition) %>%
  summarize(mean_accuracy=mean(accuracy_transformed,na.rm=TRUE))
## average
avg_critical_window_accuracy <- trial_critical_window_accuracy %>%
  ungroup() %>%
  group_by(sub_num, child_gender) %>%
  summarize(N=n(),
            mean_age = mean(age),
            mean_age_mo = mean(age_mo),
            accuracy=mean(mean_accuracy,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(mean_accuracy,na.rm=T)/sqrt(N),
            lower_ci=accuracy-ci,
            upper_ci=accuracy+ci)

#baseline-corrected target looking
## trial-level
trial_corrected_accuracy <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num,session, age,age_mo, child_gender, trial_order,trial_number,category,target_image,target_typicality_z, condition,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking)
## average
avg_corrected_target_looking <- trial_corrected_accuracy  %>%
  group_by(sub_num, child_gender) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=average_corrected_target_looking-ci,
            upper_ci=average_corrected_target_looking+ci)

By Typicality Condition

Here, we summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking.

# critical window only
avg_critical_window_accuracy_by_typicality <- d %>%
  filter(exclude_participant_critical==0) %>%
  filter(useable_critical_window==1) %>%
  filter(corrected_time_centered>=300&corrected_time_centered<=2800) %>%
  group_by(sub_num, age,age_mo, child_gender, trial_order,trial_number,target_image,target_typicality_z,condition) %>%
  summarize(mean_accuracy=mean(accuracy_transformed,na.rm=TRUE)) %>%
  ungroup() %>%
  group_by(sub_num, child_gender,condition) %>%
  summarize(N=n(),
            mean_age = mean(age),
            mean_age_mo = mean(age_mo),
            accuracy=mean(mean_accuracy,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(mean_accuracy,na.rm=T)/sqrt(N),
            lower_ci=accuracy-ci,
            upper_ci=accuracy+ci)

#baseline-corrected target looking
avg_corrected_target_looking_by_typicality <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num, age,age_mo, child_gender, trial_order,trial_number,target_image,target_typicality_z,condition,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, child_gender,age,age_mo,condition) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=average_corrected_target_looking-ci,
            upper_ci=average_corrected_target_looking+ci)

#avg typicality baseline-corrected target looking
avg_corrected_target_looking_category <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num, months,age_mo,child_gender, trial_order,typicality_condition,trial_number,category,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, age_mo,child_gender, trial_order,typicality_condition,category) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=average_corrected_target_looking-ci,
            upper_ci=average_corrected_target_looking+ci)

#avg typicality baseline-corrected target looking
avg_corrected_target_looking_typicality <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num, months,age_mo,child_gender, trial_order,typicality_condition,trial_number,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, age_mo,child_gender, trial_order,typicality_condition) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
            se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=average_corrected_target_looking-ci,
            upper_ci=average_corrected_target_looking+ci,
            lower_se=average_corrected_target_looking-se,
            upper_se=average_corrected_target_looking+se)

Models

Aim 1

1.1

avg_corrected_target_looking_by_typicality <- avg_corrected_target_looking_by_typicality %>%
  mutate(
    typicality_condition_c = case_when(
      condition == "atypical" ~ -0.5,
      condition == "typical" ~ 0.5,
      TRUE ~ NA_real_
    ),
    typicality_condition_typ = case_when(
      condition == "atypical" ~ -1,
      condition == "typical" ~ 0,
      TRUE ~ NA_real_
    ),
    typicality_condition_atyp = case_when(
      condition == "atypical" ~ 0,
      condition == "typical" ~ 1,
      TRUE ~ NA_real_
    ),
  )

m_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1|sub_num),data=avg_corrected_target_looking_by_typicality)

summary(m_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +  
##     (1 | sub_num)
##    Data: avg_corrected_target_looking_by_typicality
## 
## REML criterion at convergence: -410.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6277 -0.6652 -0.0126  0.6075  3.8645 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.002831 0.05321 
##  Residual             0.014054 0.11855 
## Number of obs: 331, groups:  sub_num, 85
## 
## Fixed effects:
##                         Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)            6.860e-02  8.721e-03 8.251e+01   7.867 1.24e-11 ***
## typicality_condition_c 2.386e-02  1.303e-02 2.448e+02   1.830   0.0684 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.003
confint(m_1_1,method="Wald")
##                               2.5 %     97.5 %
## .sig01                           NA         NA
## .sigma                           NA         NA
## (Intercept)             0.051511065 0.08569668
## typicality_condition_c -0.001690936 0.04940374
1.1.1.

Yes, infants significantly recognized the target word.

1.1.2

No significant effect of typicality

Equivalence test - can’t reject equivalence test

overall_condition_summary <- avg_corrected_target_looking_by_typicality %>%
  group_by(sub_num) %>%
  summarize(
    condition_diff = average_corrected_target_looking[condition=="typical"]-average_corrected_target_looking[condition=="atypical"]
  ) %>%
  ungroup() %>%
  summarize(
    N=n(),
    diff = mean(condition_diff),
    sd = sd(condition_diff)
  )

tsum_TOST(m1=overall_condition_summary$diff,sd1=overall_condition_summary$sd,n1=overall_condition_summary$N,eqb=0.25, eqbound_type = "SMD")
## 
## One-sample t-Test
## 
## The equivalence test was non-significant, t(165) = -1.277, p = 1.02e-01
## The null hypothesis test was non-significant, t(165) = 1.944, p = 5.37e-02
## NHST: don't reject null significance hypothesis that the effect is equal to zero 
## TOST: don't reject null equivalence hypothesis
## 
## TOST Results 
##                 t  df p.value
## t-test      1.944 165   0.054
## TOST Lower  5.165 165 < 0.001
## TOST Upper -1.277 165   0.102
## 
## Effect Sizes 
##            Estimate      SE             C.I. Conf. Level
## Raw         0.02427 0.01249 [0.0036, 0.0449]         0.9
## Hedges's g  0.15016 0.07805 [0.0221, 0.2777]         0.9
## Note: SMD confidence intervals are an approximation. See vignette("SMD_calcs").
1.1.3
m_1_1_3_typ <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1|sub_num),data=avg_corrected_target_looking_by_typicality)

summary(m_1_1_3_typ)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +  
##     (1 | sub_num)
##    Data: avg_corrected_target_looking_by_typicality
## 
## REML criterion at convergence: -410.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6277 -0.6652 -0.0126  0.6075  3.8645 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.002831 0.05321 
##  Residual             0.014054 0.11855 
## Number of obs: 331, groups:  sub_num, 85
## 
## Fixed effects:
##                           Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                0.08053    0.01090 182.47072   7.388 5.16e-12 ***
## typicality_condition_typ   0.02386    0.01303 244.80317   1.830   0.0684 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.600
confint(m_1_1_3_typ,method="Wald")
##                                 2.5 %     97.5 %
## .sig01                             NA         NA
## .sigma                             NA         NA
## (Intercept)               0.059167816 0.10189633
## typicality_condition_typ -0.001690936 0.04940374

Infants successfully recognize words in the typical condition.

m_1_1_3_atyp <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1|sub_num),data=avg_corrected_target_looking_by_typicality)

summary(m_1_1_3_atyp)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +  
##     (1 | sub_num)
##    Data: avg_corrected_target_looking_by_typicality
## 
## REML criterion at convergence: -410.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6277 -0.6652 -0.0126  0.6075  3.8645 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.002831 0.05321 
##  Residual             0.014054 0.11855 
## Number of obs: 331, groups:  sub_num, 85
## 
## Fixed effects:
##                            Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                 0.05668    0.01087 181.46521   5.212 5.05e-07 ***
## typicality_condition_atyp   0.02386    0.01303 244.80317   1.830   0.0684 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ -0.597
confint(m_1_1_3_atyp,method="Wald")
##                                  2.5 %     97.5 %
## .sig01                              NA         NA
## .sigma                              NA         NA
## (Intercept)                0.035363003 0.07798834
## typicality_condition_atyp -0.001690936 0.04940374

Infants successfully recognize words in the atypical condition.

1.2

trial_corrected_accuracy <- trial_corrected_accuracy %>%
  mutate(
    typicality_condition_c = case_when(
      condition == "atypical" ~ -0.5,
      condition == "typical" ~ 0.5,
      TRUE ~ NA_real_
    ),
    typicality_condition_typ = case_when(
      condition == "atypical" ~ -1,
      condition == "typical" ~ 0,
      TRUE ~ NA_real_
    ),
    typicality_condition_atyp = case_when(
      condition == "atypical" ~ 0,
      condition == "typical" ~ 1,
      TRUE ~ NA_real_
    ),
  )

m_1_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c + 
            (1 + typicality_condition_c|sub_num) +
            (1|category),
          data=trial_corrected_accuracy)
summary(m_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c + (1 +  
##     typicality_condition_c | sub_num) + (1 | category)
##    Data: trial_corrected_accuracy
## 
## REML criterion at convergence: 2293.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1691 -0.6304 -0.0236  0.6712  2.7664 
## 
## Random effects:
##  Groups   Name                   Variance  Std.Dev. Corr
##  sub_num  (Intercept)            0.0025672 0.05067      
##           typicality_condition_c 0.0003007 0.01734  1.00
##  category (Intercept)            0.0001679 0.01296      
##  Residual                        0.1190804 0.34508      
## Number of obs: 3138, groups:  sub_num, 85; category, 4
## 
## Fixed effects:
##                         Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)              0.07040    0.01052   5.58830   6.689 0.000725 ***
## typicality_condition_c   0.01633    0.01248 779.28972   1.308 0.191115    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.079 
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')

1.3

Resampling

In order to plot participants’ average proportion looking to the target across the trial, we smooth/ resample time. This is necessary when plotting the timecourses given the variable sampling rate in the data (otherwise the mean observations “jump around” due to varying contributing data composition at different time points).

target_ms_per_frame=1000/30
#adapted from: https://github.com/langcog/peekds/blob/master/R/generate_aoi.R
resample_trial <- function(df_trial) {
  t_origin <- df_trial$corrected_time_centered
  data_origin <- df_trial$accuracy_transformed

  # create the new timestamps for resampling
  t_start <- min(t_origin) - (min(t_origin) %% target_ms_per_frame)
  t_resampled <- seq(from = t_start, to = max(t_origin),
                     by = target_ms_per_frame)

  # exchange strings values with integers for resampling
  # this step critical for interpolating missing vals quickly and correctly
  aoi_num <- data_origin %>%
    dplyr::recode(.missing = 2) #recode NA as 2
    
  # start resampling with approx
  aoi_resampled <- stats::approx(x = t_origin, y = aoi_num, xout = t_resampled,
                                 method = "constant", rule = 2,
                                 ties = "ordered")$y
  aoi_resampled_recoded <- aoi_resampled %>%
    dplyr::recode("0"="0","1"="1","2" = "missing") %>%
    as.numeric()

  # adding back the columns to match schema
  dplyr::tibble(corrected_time_centered = t_resampled,
                accuracy_transformed = aoi_resampled_recoded,
                trial_number = df_trial$trial_number[1],
                sub_num = df_trial$sub_num[1])
}

d_resampled <- d %>%
  dplyr::mutate(sub_num_trial_number = paste(.data$sub_num,
                                           .data$trial_number, sep = "_")) %>%
      split(.$sub_num_trial_number) %>%
      purrr::map_df(resample_trial) %>%
      dplyr::arrange(.data$sub_num, .data$trial_number)

d_info <- d %>%
  select(-corrected_time_centered,-accuracy_transformed) %>%
  distinct(sub_num, exclude_participant, useable_window, age,age_mo, child_gender, trial_order, condition, trial_order,trial_number,target_image,target_typicality_z)

d_resampled <- d_resampled %>%
  left_join(d_info) %>%
  mutate(corrected_time_centered =round(corrected_time_centered,0))
Timecourse analysis - cluster-based permutation analysis

Next, we prepare the data for use with the eyetrackingR package

d_eyetrackingr <- d_resampled %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  unite("unique_trial",trial_order,trial_number,sep="_",remove=FALSE) %>%
  mutate(
    target = case_when(
      is.na(accuracy_transformed) ~ NA,
      accuracy_transformed == 1 ~ TRUE,
      accuracy_transformed == 0 ~ FALSE,
    ),
    distractor = case_when(
      is.na(accuracy_transformed) ~ NA,
      accuracy_transformed == 0 ~ TRUE,
      accuracy_transformed == 1 ~ FALSE,
    ),
    trackloss = case_when(
      is.na(accuracy_transformed) ~ TRUE,
      TRUE ~ FALSE
    )
  ) %>%
  make_eyetrackingr_data(
    participant_column = "sub_num",
    trial_column = "unique_trial",
    time_column = "corrected_time_centered",
    trackloss_column = "trackloss",
    aoi_columns = c("target","distractor"),
    treat_non_aoi_looks_as_missing = TRUE
  )

response_window <- subset_by_window(
  d_eyetrackingr,
  window_start_time = 300,
  window_end_time = 2800,
  rezero=FALSE
)
summary_data_loss <- describe_data(response_window, 'target', 'sub_num')

response_time <- make_time_sequence_data(response_window,
                                  time_bin_size = 100, 
                                  predictor_columns = c("condition"),
                                  aois = "target",
                                  summarize_by = "sub_num" )

# visualize timecourse
plot(response_time, predictor_column = "condition") + 
  theme_light() +
  coord_cartesian(ylim = c(0,1))

#divergence analysis
# tb_analysis <- analyze_time_bins(data = response_time, predictor_column = "condition", test= 'boot_splines', within_subj = TRUE, bs_samples = 1000, alpha = .05/num_time_bins)
# plot(tb_analysis) + theme_light()
# summary(tb_analysis)

#bootstrapped cluster-based permutation analysis
n_samples <- 100
threshold_t <- 2
df_timeclust <- make_time_cluster_data(response_time, 
                                      test= "t.test", paired=TRUE,
                                      predictor_column = "condition", 
                                      threshold = threshold_t) 
plot(df_timeclust) +  ylab("T-Statistic") + theme_light()

summary(df_timeclust)
## Test Type:    t.test 
## Predictor:    condition 
## Formula:  Pair(Prop[condition == "atypical"], Prop[condition == "typical"]) ~      1 
## Summary of Clusters ======
## [1] Cluster   Direction EndTime  
## <0 rows> (or 0-length row.names)
clust_analysis <- analyze_time_clusters(df_timeclust, within_subj=TRUE, paired=TRUE,
                                        samples=n_samples)
plot(clust_analysis) + theme_light()

summary(clust_analysis)
## Test Type:    t.test 
## Predictor:    condition 
## Formula:  Pair(Prop[condition == "atypical"], Prop[condition == "typical"]) ~      1 
## Null Distribution   ====== 
##  Mean:        -0.4082 
##  2.5%:        -14.0382 
## 97.5%:        13.1809 
## Summary of Clusters ======
## [1] Cluster     Direction   EndTime     Probability
## <0 rows> (or 0-length row.names)
Timecourse Plot

Next, we plot the data. First we summarize the data in two steps: (1) summarize the data by subject for each time point, followed by (2) averaging looking for each time point across subjects.

#summarizing within subject for each time point
summarize_subj <- d_resampled %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  group_by(sub_num, age,age_mo, child_gender, trial_order, corrected_time_centered) %>%
  summarize(N=n(),
            non_na_n = sum(!is.na(accuracy_transformed)), 
            mean_accuracy=mean(accuracy_transformed,na.rm=TRUE),
            ci=qt(0.975, non_na_n-1)*sd(accuracy_transformed,na.rm=T)/sqrt(non_na_n),
            lower_ci=mean_accuracy-ci,
            upper_ci=mean_accuracy+ci) %>%
  ungroup()

#summarizing across subjects for each time point
summarize_across_subj <- summarize_subj %>%
  group_by(corrected_time_centered) %>%
  dplyr::summarize(n=n(),
            accuracy=mean(mean_accuracy,na.rm=TRUE),
            sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
            se_accuracy=sd_accuracy/sqrt(n))

ggplot(summarize_across_subj,aes(corrected_time_centered,accuracy))+
  xlim(-2000,4000)+
  geom_smooth(method="gam")+
  geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
  geom_point()+
  geom_vline(xintercept=0,size=1.5)+
  geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
  geom_vline(xintercept=300,linetype="dotted")+
  ylim(0.35,0.65)

ggsave(here::here("figures","overall_accuracy.png"))

Timecourse by age

summarize_across_subj_by_age <- summarize_subj %>%
  mutate(age_group=ifelse(age_mo>16,"older than 16 months","younger than 16 months")) %>%
  group_by(age_group,corrected_time_centered) %>%
  dplyr::summarize(n=n(),
                   accuracy=mean(mean_accuracy,na.rm=TRUE),
                   sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
                   se_accuracy=sd_accuracy/sqrt(n))
ggplot(summarize_across_subj_by_age,aes(corrected_time_centered,accuracy))+
  xlim(-2000,4000)+
  geom_smooth(method="gam")+
  geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
  geom_point()+
  geom_vline(xintercept=0,size=1.5)+
  geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
  geom_vline(xintercept=300,linetype="dotted")+
  facet_wrap(~age_group)

ggsave(here::here("figures","overall_accuracy_by_age.png"),width=12, height=9)

Timecourse by condition

summarize_subj_condition <- d_resampled %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  group_by(sub_num, age,age_mo, child_gender, condition, trial_order, corrected_time_centered) %>%
  summarize(mean_accuracy=mean(accuracy_transformed,na.rm=TRUE))

summarize_across_subj_cond <- summarize_subj_condition %>%
  group_by(condition,corrected_time_centered) %>%
  summarize(n=n(),
            accuracy=mean(mean_accuracy,na.rm=TRUE),
            sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
            se_accuracy=sd_accuracy/sqrt(n))

num_subjects <- summarize_across_subj_cond %>%
  group_by()%>%
  summarize(max_subnum=max(n))

summarize_across_subj_cond<- summarize_subj_condition %>%
  group_by(condition,corrected_time_centered) %>%
  summarize(n=n(),
            accuracy=mean(mean_accuracy,na.rm=TRUE),
            sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
            se_accuracy=sd_accuracy/sqrt(n))

ggplot(summarize_across_subj_cond,aes(corrected_time_centered,accuracy,color=condition))+
  xlim(-2500,4000)+
  geom_rect(data = data.frame(xmin = 300,
                              xmax = 2800,
                              ymin = -Inf,
                              ymax = Inf),
            aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
            fill = "grey", alpha = 0.2)+
  geom_rect(data = data.frame(xmin = -2000,
                              xmax = 0,
                              ymin = -Inf,
                              ymax = Inf),
            aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
            fill = "grey", alpha = 0.2)+
  geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
  geom_point(alpha=0.5)+
  geom_smooth(data=summarize_subj_condition,aes(y=mean_accuracy),method="gam")+
  geom_vline(xintercept=0,size=1.5)+
  geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
  geom_vline(xintercept=300,linetype="dotted")+
  geom_vline(xintercept=2800,linetype="dotted")+
  geom_vline(xintercept=-2000,linetype="dotted")+
  geom_vline(xintercept=0,linetype="dotted")+
  theme(legend.position = c(0.75,0.15))+
  annotate("text",label="Critical Window",x=1550,y=0.9)+
  annotate("text",label="Baseline Window",x=-1000,y=0.9)+
  ylim(0,1)+
  ylab("Proportion Target Looking")+
  xlab("Time (centered on target word onset, in ms)")

ggsave(here::here("figures","typicality_accuracy.png"),width=10,height=6)

Aim 2

Change across age

trial_corrected_accuracy <- trial_corrected_accuracy %>%
  left_join(subj_info_multisession)
  

m_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c + 
            (1 + typicality_condition_c|sub_num) +
            (1|category),
          data=trial_corrected_accuracy)
summary(m_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c +  
##     (1 + typicality_condition_c | sub_num) + (1 | category)
##    Data: trial_corrected_accuracy
## 
## REML criterion at convergence: 2300.4
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.12875 -0.63679 -0.01852  0.67408  2.79689 
## 
## Random effects:
##  Groups   Name                   Variance  Std.Dev. Corr
##  sub_num  (Intercept)            0.0020489 0.04527      
##           typicality_condition_c 0.0002154 0.01468  1.00
##  category (Intercept)            0.0001748 0.01322      
##  Residual                        0.1191112 0.34512      
## Number of obs: 3138, groups:  sub_num, 85; category, 4
## 
## Fixed effects:
##                                  Estimate Std. Error        df t value Pr(>|t|)
## (Intercept)                     6.901e-02  1.032e-02 4.993e+00   6.684  0.00114
## typicality_condition_c          1.574e-02  1.247e-02 9.188e+02   1.262  0.20727
## age_mo_c                        1.614e-02  5.317e-03 8.928e+01   3.036  0.00314
## typicality_condition_c:age_mo_c 6.903e-03  8.418e-03 9.621e+02   0.820  0.41243
##                                   
## (Intercept)                     **
## typicality_condition_c            
## age_mo_c                        **
## typicality_condition_c:age_mo_c   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) typc__ ag_m_c
## typclty_cn_  0.060              
## age_mo_c    -0.049  0.000       
## typclt__:__  0.001 -0.075  0.076
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')

Quick Visualization

ggplot(avg_corrected_target_looking_by_typicality,aes(age,average_corrected_target_looking,color=condition))+
  geom_point()+
  geom_smooth(method="lm")+
  geom_hline(yintercept=0,linetype="dashed")

ggplot(trial_critical_window_accuracy,aes(age,mean_accuracy,color=condition))+
  geom_point(alpha=0.1)+
  geom_smooth()

m <- lmer(mean_target_looking_critical ~ 1 + typicality_condition_c * age_mo_c + mean_target_looking_baseline +
            (1 + typicality_condition_c|sub_num) +
            (1|category),
          data=trial_corrected_accuracy)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## mean_target_looking_critical ~ 1 + typicality_condition_c * age_mo_c +  
##     mean_target_looking_baseline + (1 + typicality_condition_c |  
##     sub_num) + (1 | category)
##    Data: trial_corrected_accuracy
## 
## REML criterion at convergence: 922.8
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.44570 -0.63233  0.05242  0.68487  1.98760 
## 
## Random effects:
##  Groups   Name                   Variance  Std.Dev. Corr
##  sub_num  (Intercept)            1.760e-03 0.04196      
##           typicality_condition_c 6.906e-05 0.00831  1.00
##  category (Intercept)            9.717e-04 0.03117      
##  Residual                        7.627e-02 0.27617      
## Number of obs: 3138, groups:  sub_num, 85; category, 4
## 
## Fixed effects:
##                                  Estimate Std. Error        df t value Pr(>|t|)
## (Intercept)                     5.145e-01  2.006e-02 6.768e+00  25.646 5.39e-08
## typicality_condition_c          1.632e-02  9.942e-03 1.480e+03   1.642  0.10079
## age_mo_c                        1.517e-02  4.521e-03 8.740e+01   3.356  0.00117
## mean_target_looking_baseline    9.091e-02  2.177e-02 3.094e+03   4.176 3.05e-05
## typicality_condition_c:age_mo_c 5.864e-03  6.710e-03 1.532e+03   0.874  0.38232
##                                    
## (Intercept)                     ***
## typicality_condition_c             
## age_mo_c                        ** 
## mean_target_looking_baseline    ***
## typicality_condition_c:age_mo_c    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) typc__ ag_m_c mn_t__
## typclty_cn_  0.021                     
## age_mo_c    -0.024  0.001              
## mn_trgt_lk_ -0.532 -0.001  0.005       
## typclt__:__ -0.001 -0.075  0.058  0.004
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')

Accuracy across age

Critical Window Only

ggplot(filter(avg_critical_window_accuracy,mean_age<700),aes(mean_age,accuracy))+
  geom_pointrange(aes(ymin=lower_ci,ymax=upper_ci), 
                  position=position_jitter(width=0.1),
                  width=0,
                  size=1.5) +
  geom_hline(yintercept=0.5,linetype="dashed")+
  geom_smooth(method="lm")+
  xlab("Age (in days)")+
  ylab("Proportion Target Looking\nduring the Critical Window")+
  ylim(0,1)

ggsave(here::here("figures","age_relationship_critical_window_accuracy.png"),width=7,height=6)

Baseline-corrected proportion target looking

# ggplot(avg_corrected_target_looking,aes(age,average_corrected_target_looking))+
#   geom_pointrange(aes(ymin=lower_ci,ymax=upper_ci), 
#                   position=position_jitter(width=0.1),
#                   width=0,
#                   size=1.5) +
#   geom_hline(yintercept=0,linetype="dashed")+
#   geom_smooth(method="lm")+
#   xlab("Age (in months)")+
#   ylab("Baseline-Corrected Proportion Target Looking")+
#   ylim(-0.55,0.55)+
#   scale_x_continuous(breaks=seq(12,18,1))
# ggsave(here::here("figures","age_relationship_baseline_corrected_accuracy.png"),width=7,height=6)

Target Looking By Item

Next, we investigate item-level (target word) variation in proportion target looking.

Overall target looking during critical window and baseline window

First, we inspect overall target looking in the critical window and in the baseline window. Note the baseline effects, such that dog and cat are more likely to be fixated during baseline than bird and fish.

#average target looking during the baseline and critical window by item for each subject
avg_subj_target_looking_by_item <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num,  age,age_mo, child_gender, trial_order,trial_number,category,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, age,age_mo, child_gender, trial_order,category) %>%
  summarize(N=n(),
            mean_critical_accuracy=mean(mean_target_looking_critical,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
            lower_ci=mean_critical_accuracy-ci,
            upper_ci=mean_critical_accuracy+ci,
            mean_baseline_accuracy=mean(mean_target_looking_baseline,na.rm=TRUE),
            baseline_ci=qt(0.975, N-1)*sd(mean_target_looking_baseline,na.rm=T)/sqrt(N),
            lower_baseline_ci=mean_baseline_accuracy-baseline_ci,
            upper_baseline_ci=mean_baseline_accuracy+baseline_ci)

#summarize average target looking across subjects
avg_target_looking_by_item <- avg_subj_target_looking_by_item %>%
  group_by(category) %>%
  summarize(N=n(),
            critical_accuracy=mean(mean_critical_accuracy,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(mean_critical_accuracy,na.rm=T)/sqrt(N),
            lower_ci=critical_accuracy-ci,
            upper_ci=critical_accuracy+ci,
            baseline_accuracy=mean(mean_baseline_accuracy,na.rm=TRUE),
            baseline_ci=qt(0.975, N-1)*sd(mean_baseline_accuracy,na.rm=T)/sqrt(N),
            lower_baseline_ci=baseline_accuracy-baseline_ci,
            upper_baseline_ci=baseline_accuracy+baseline_ci)
avg_target_looking_by_item %>%
  knitr::kable()
category N critical_accuracy ci lower_ci upper_ci baseline_accuracy baseline_ci lower_baseline_ci upper_baseline_ci
bird 165 0.5345443 0.0231118 0.5114325 0.5576561 0.4563859 0.0174362 0.4389497 0.4738222
cat 166 0.5974269 0.0226906 0.5747363 0.6201174 0.5169224 0.0175366 0.4993858 0.5344590
dog 165 0.5785324 0.0210893 0.5574431 0.5996218 0.5323489 0.0153733 0.5169757 0.5477222
fish 165 0.5204611 0.0246016 0.4958595 0.5450627 0.4620394 0.0168223 0.4452171 0.4788618

Target looking during the baseline and critical window split by age

#summarize average corrected target looking across subject
avg_target_looking_by_item_by_age <- avg_subj_target_looking_by_item %>%
  mutate(age_group=ifelse(age_mo>16,"older than 16 months","younger than 16 months")) %>%
  group_by(age_group,category) %>%
  summarize(N=n(),
            critical_accuracy=mean(mean_critical_accuracy,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(mean_critical_accuracy,na.rm=T)/sqrt(N),
            lower_ci=critical_accuracy-ci,
            upper_ci=critical_accuracy+ci,
            baseline_accuracy=mean(mean_baseline_accuracy,na.rm=TRUE),
            baseline_ci=qt(0.975, N-1)*sd(mean_baseline_accuracy,na.rm=T)/sqrt(N),
            lower_baseline_ci=baseline_accuracy-baseline_ci,
            upper_baseline_ci=baseline_accuracy+baseline_ci)
avg_target_looking_by_item_by_age %>%
  knitr::kable()
age_group category N critical_accuracy ci lower_ci upper_ci baseline_accuracy baseline_ci lower_baseline_ci upper_baseline_ci
older than 16 months bird 68 0.5632700 0.0338378 0.5294322 0.5971078 0.4499680 0.0277305 0.4222375 0.4776985
older than 16 months cat 68 0.5987425 0.0323744 0.5663681 0.6311169 0.4914161 0.0253063 0.4661098 0.5167224
older than 16 months dog 68 0.5846130 0.0330569 0.5515561 0.6176700 0.5359675 0.0205925 0.5153750 0.5565601
older than 16 months fish 68 0.5720050 0.0333565 0.5386485 0.6053615 0.4666964 0.0304112 0.4362852 0.4971076
younger than 16 months bird 97 0.5144067 0.0312134 0.4831933 0.5456200 0.4608851 0.0227758 0.4381094 0.4836609
younger than 16 months cat 98 0.5965140 0.0316579 0.5648560 0.6281719 0.5346206 0.0236818 0.5109387 0.5583024
younger than 16 months dog 97 0.5742698 0.0278494 0.5464203 0.6021192 0.5298122 0.0220962 0.5077160 0.5519083
younger than 16 months fish 97 0.4843272 0.0333021 0.4510251 0.5176294 0.4587747 0.0195579 0.4392169 0.4783326

Overall baseline-corrected proportion target looking

Next, we investigate item-level variation in word recognition as measured by baseline-corrected proportion target looking (to help account for the baseline difference noted above).

#average corrected target looking by item for each subject
avg_subj_corrected_target_looking_by_item <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  mutate(age_group=ifelse(age_mo>16,"older than 16 months","younger than 16 months")) %>%
  distinct(sub_num, months,age_mo,age_group, child_gender, trial_order,trial_number,category,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, age_mo,age_group, child_gender, trial_order,category) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=average_corrected_target_looking-ci,
            upper_ci=average_corrected_target_looking+ci)

#summarize average corrected target looking across subject
avg_corrected_target_looking_by_item <- avg_subj_corrected_target_looking_by_item %>%
  group_by(category) %>%
  summarize(N=n(),
            corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=corrected_target_looking-ci,
            upper_ci=corrected_target_looking+ci)
avg_corrected_target_looking_by_item %>%
  knitr::kable()
category N corrected_target_looking ci lower_ci upper_ci
bird 165 0.0781583 0.0287984 0.0493599 0.1069568
cat 166 0.0805045 0.0279757 0.0525288 0.1084802
dog 165 0.0461835 0.0255777 0.0206059 0.0717612
fish 165 0.0584217 0.0279178 0.0305038 0.0863395

Baseline-corrected proportion target looking split by age

#summarize average corrected target looking across subject
avg_corrected_target_looking_by_item_by_age <- avg_subj_corrected_target_looking_by_item %>%
  group_by(age_group,category) %>%
  summarize(N=n(),
            corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=corrected_target_looking-ci,
            upper_ci=corrected_target_looking+ci)
avg_corrected_target_looking_by_item_by_age %>%
  knitr::kable()
age_group category N corrected_target_looking ci lower_ci upper_ci
older than 16 months bird 68 0.1133020 0.0449011 0.0684009 0.1582031
older than 16 months cat 68 0.1073264 0.0421662 0.0651602 0.1494926
older than 16 months dog 68 0.0486455 0.0375555 0.0110900 0.0862010
older than 16 months fish 68 0.1053086 0.0424541 0.0628545 0.1477627
younger than 16 months bird 97 0.0535215 0.0374115 0.0161100 0.0909331
younger than 16 months cat 98 0.0618934 0.0374395 0.0244539 0.0993328
younger than 16 months dog 97 0.0444576 0.0351927 0.0092649 0.0796503
younger than 16 months fish 97 0.0255525 0.0361833 -0.0106308 0.0617358

Baseline-corrected proportion target looking split by typicality

#average corrected target looking by item by typicality for each subject
avg_subj_corrected_target_looking_by_item_by_typicality <- d %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num, months,age_mo, trial_order,trial_number,typicality_condition,category,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking) %>%
  group_by(sub_num, age_mo, trial_order,typicality_condition,category) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE))
#summarize average corrected target looking across subject
avg_corrected_target_looking_by_item_by_typicality <- avg_subj_corrected_target_looking_by_item_by_typicality %>%
  group_by(typicality_condition,category) %>%
  summarize(N=n(),
            corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
            lower_ci=corrected_target_looking-ci,
            upper_ci=corrected_target_looking+ci)
avg_corrected_target_looking_by_item_by_typicality %>%
  knitr::kable()
typicality_condition category N corrected_target_looking ci lower_ci upper_ci
atypical bird 159 0.0983027 0.0397753 0.0585274 0.1380780
atypical cat 163 0.0586166 0.0413179 0.0172987 0.0999345
atypical dog 162 0.0165194 0.0378078 -0.0212884 0.0543272
atypical fish 164 0.0601608 0.0368548 0.0233060 0.0970156
typical bird 163 0.0657351 0.0363182 0.0294169 0.1020533
typical cat 163 0.1129400 0.0396868 0.0732532 0.1526269
typical dog 164 0.0818268 0.0374507 0.0443761 0.1192775
typical fish 159 0.0585323 0.0392131 0.0193191 0.0977454

Baseline-corrected proportion target looking by category

pal <- wes_palette("Rushmore1", n=5)
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
p2 <- ggplot(avg_corrected_target_looking_category,aes(x=typicality_condition,y=average_corrected_target_looking, fill=typicality_condition))+
  geom_half_violin(data=filter(avg_corrected_target_looking_category, typicality_condition=="atypical"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
  geom_half_violin(data=filter(avg_corrected_target_looking_category, typicality_condition=="typical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
  geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.05,size=0.75,position=jitterer)+
  geom_point(aes(color=typicality_condition,group=sub_num), size = 2.5, alpha=0.05,position=jitterer)+
  geom_point(data=avg_corrected_target_looking_by_item_by_typicality,aes(y=corrected_target_looking),color="black",size=1.8)+
  geom_line(data=avg_corrected_target_looking_by_item_by_typicality,aes(y=corrected_target_looking,group=1),color="black",size=1.5)+
  geom_errorbar(data=avg_corrected_target_looking_by_item_by_typicality,aes(y=corrected_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,color="black")+
  #geom_boxplot(outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
  #scale_colour_brewer(palette = "Dark2")+
  #scale_fill_brewer(palette = "Dark2")+
  geom_hline(yintercept=0,linetype="dashed")+
  scale_colour_manual(values=pal[c(3,4)])+
  scale_fill_manual(values=pal[c(3,4)])+
  facet_wrap(.~category)+
  theme(legend.position="none")+
  xlab("Typicality Condition")+
  ylab("Baseline-Corrected\nProportion Target Looking")+
  theme(axis.title.x = element_text(face="bold", size=20),
        axis.text.x  = element_text(size=16),
        axis.title.y = element_text(face="bold", size=20),
        axis.text.y  = element_text(size=16),
        strip.text.x = element_text(size = 16,face="bold"))


ggsave(here::here("figures","age_relationship_baseline_corrected_accuracy_category.png"),width=7,height=6)
pal <- wes_palette("Rushmore1", n=5)
p1 <- ggplot(avg_corrected_target_looking_typicality,aes(age_mo,average_corrected_target_looking,color=typicality_condition,group=typicality_condition))+
  geom_pointrange(aes(ymin=lower_se,ymax=upper_se), 
                  position=position_jitter(width=0.2),
                  width=0,
                  size=1) +
  geom_hline(yintercept=0,linetype="dashed")+
  geom_smooth(method="lm",color="black",size=1.3)+
  xlab("Age (in months)")+
  ylab("Baseline-Corrected\nProportion Target Looking")+
  #ylim(-0.55,0.5)+
  scale_colour_manual(values=pal[c(3,4)])+
  scale_x_continuous(breaks=seq(12,18,1))+
  facet_wrap(~typicality_condition)+
  theme(
    strip.background = element_rect(size=1, colour = "black"),
    strip.text = element_text(size=16,face="bold"),
    axis.title=element_text(size=20,face="bold"),
    axis.text = element_text(size=14))+
  theme(legend.position="none")
ggsave(here::here("figures","age_relationship_baseline_corrected_accuracy_typicality.png"),width=9,height=6)

plot_grid(p1,p2,nrow=1,ncol=2,labels=c("A","B"),size=24)

ggsave(here::here("figures","age_category_typicality.png"),width=12,height=6)

Aim 3

To test whether individual differences in word recognition or typicality effects are predicted by differences in experiences with each exemplar.

3.1 z-scored by participant

#zscore parent report of typicality within participants
parent_typicality_z <- d %>%
  group_by(sub_num) %>%
  mutate(target_parent_typicality_z = ((target_parent_typicality_rating - mean(target_parent_typicality_rating))/sd(target_parent_typicality_rating)),
         distractor_parent_typicality_z = ((distractor_parent_typicality_rating - mean(distractor_parent_typicality_rating))/sd(distractor_parent_typicality_rating)))
  
parent_typicality <- parent_typicality_z %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num,months,age,child_gender, trial_order,trial_number,category,target_image,target_typicality_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking,target_parent_typicality_z,distractor_parent_typicality_z,target_image,target_parent_typicality_rating,distractor_parent_typicality_rating) %>%
  mutate(age_c = age - mean(age,na.rm = T))%>%
  group_by(sub_num,target_image, age_c, category,target_parent_typicality_z,target_parent_typicality_rating,distractor_parent_typicality_z) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE)) %>%
  na.omit(target_parent_typicality_rating)

#subject details for aim 3 analysis (how many participants have survey data)
aim3_subject_info <- parent_typicality %>%
  ungroup()%>%
  summarize(
    N = length(unique(sub_num)),
    mean_age = mean(age_c),
    sd_age = sd(age_c)
  )

aim3_subject_info%>%
  knitr::kable()
N mean_age sd_age
72 0 4.177312
#model
m_3_1 <- lmer(average_corrected_target_looking ~ target_parent_typicality_z+age_c+ (target_parent_typicality_z||sub_num) + (1|category), parent_typicality)

summary(m_3_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ target_parent_typicality_z +  
##     age_c + (target_parent_typicality_z || sub_num) + (1 | category)
##    Data: parent_typicality
## 
## REML criterion at convergence: 1969.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1759 -0.6307 -0.0280  0.6758  2.7541 
## 
## Random effects:
##  Groups    Name                       Variance  Std.Dev.
##  sub_num   (Intercept)                0.0023770 0.04875 
##  sub_num.1 target_parent_typicality_z 0.0000000 0.00000 
##  category  (Intercept)                0.0002628 0.01621 
##  Residual                             0.1186686 0.34448 
## Number of obs: 2694, groups:  sub_num, 72; category, 4
## 
## Fixed effects:
##                              Estimate Std. Error         df t value Pr(>|t|)   
## (Intercept)                 7.187e-02  1.197e-02  4.995e+00   6.002  0.00185 **
## target_parent_typicality_z  7.773e-03  6.706e-03  2.187e+03   1.159  0.24654   
## age_c                      -6.834e-04  1.589e-03  2.618e+03  -0.430  0.66723   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) trg___
## trgt_prnt__  0.000       
## age_c        0.000 -0.007
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_3_1,method="Wald")
##                                   2.5 %      97.5 %
## .sig01                               NA          NA
## .sig02                               NA          NA
## .sig03                               NA          NA
## .sigma                               NA          NA
## (Intercept)                 0.048404721 0.095343106
## target_parent_typicality_z -0.005370539 0.020916283
## age_c                      -0.003798151 0.002431438

Z-scored by category by participant experience ratings

#zscore parent report of typicality within participants by 
parent_typicality_category_z <- d %>%
  group_by(sub_num,category) %>%
  mutate(target_parent_typicality_category_z = ((target_parent_typicality_rating - mean(target_parent_typicality_rating))/sd(target_parent_typicality_rating)),
         distractor_parent_typicality_category_z = ((distractor_parent_typicality_rating - mean(distractor_parent_typicality_rating))/sd(distractor_parent_typicality_rating)))
  
parent_typicality_category <- parent_typicality_category_z %>%
  filter(exclude_participant==0) %>%
  filter(useable_window==1) %>%
  distinct(sub_num,months,age,child_gender, trial_order,trial_number,category,target_image,target_parent_typicality_category_z,mean_target_looking_critical,mean_target_looking_baseline,corrected_target_looking,distractor_parent_typicality_category_z,target_image) %>%
  mutate(age_c = age - mean(age,na.rm = T))%>%
  group_by(sub_num,target_image, category,target_parent_typicality_category_z, age_c) %>%
  summarize(N=n(),
            average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE)) %>%
  na.omit(target_parent_typicality_rating)

#model
m_3_1_2 <- lmer(average_corrected_target_looking ~ target_parent_typicality_category_z+age_c+ (target_parent_typicality_category_z||sub_num) + (1|category), parent_typicality_category)

summary(m_3_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## average_corrected_target_looking ~ target_parent_typicality_category_z +  
##     age_c + (target_parent_typicality_category_z || sub_num) +  
##     (1 | category)
##    Data: parent_typicality_category
## 
## REML criterion at convergence: 1987.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1600 -0.6210 -0.0277  0.6770  2.7540 
## 
## Random effects:
##  Groups    Name                                Variance  Std.Dev.
##  sub_num   (Intercept)                         0.0023069 0.04803 
##  sub_num.1 target_parent_typicality_category_z 0.0000000 0.00000 
##  category  (Intercept)                         0.0003026 0.01739 
##  Residual                                      0.1191765 0.34522 
## Number of obs: 2703, groups:  sub_num, 73; category, 4
## 
## Fixed effects:
##                                       Estimate Std. Error         df t value
## (Intercept)                          7.183e-02  1.233e-02  4.715e+00   5.825
## target_parent_typicality_category_z  2.694e-03  6.643e-03  2.637e+03   0.406
## age_c                               -4.698e-04  1.603e-03  2.626e+03  -0.293
##                                     Pr(>|t|)   
## (Intercept)                          0.00256 **
## target_parent_typicality_category_z  0.68510   
## age_c                                0.76940   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) tr____
## trgt_prn___ -0.002       
## age_c        0.000 -0.004
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_3_1_2,method="Wald")
##                                            2.5 %      97.5 %
## .sig01                                        NA          NA
## .sig02                                        NA          NA
## .sig03                                        NA          NA
## .sigma                                        NA          NA
## (Intercept)                          0.047664623 0.096003602
## target_parent_typicality_category_z -0.010326459 0.015714879
## age_c                               -0.003610816 0.002671119

Model accounting for distractor rating too

The outcome variable, average_corrected_target_looking accounts for baseline preferences for different images. We have typicality information for both the targets and the distractor. Does accounting for the saliency/experience with the distractor predict accuracy?

#model
m_3_3 <- lmer(average_corrected_target_looking ~ target_parent_typicality_z+distractor_parent_typicality_z+age_c+ (target_parent_typicality_z||sub_num) + (1|category), parent_typicality)

summary(m_3_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ target_parent_typicality_z +  
##     distractor_parent_typicality_z + age_c + (target_parent_typicality_z ||  
##     sub_num) + (1 | category)
##    Data: parent_typicality
## 
## REML criterion at convergence: 1976.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1867 -0.6340 -0.0237  0.6720  2.7570 
## 
## Random effects:
##  Groups    Name                       Variance Std.Dev.
##  sub_num   (Intercept)                0.002381 0.04879 
##  sub_num.1 target_parent_typicality_z 0.000000 0.00000 
##  category  (Intercept)                0.000254 0.01594 
##  Residual                             0.118675 0.34449 
## Number of obs: 2694, groups:  sub_num, 72; category, 4
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value
## (Intercept)                     7.187e-02  1.188e-02  5.035e+00   6.048
## target_parent_typicality_z      4.513e-03  7.550e-03  1.645e+03   0.598
## distractor_parent_typicality_z  6.985e-03  7.434e-03  2.297e+03   0.940
## age_c                          -7.013e-04  1.589e-03  2.617e+03  -0.441
##                                Pr(>|t|)   
## (Intercept)                     0.00174 **
## target_parent_typicality_z      0.55014   
## distractor_parent_typicality_z  0.34751   
## age_c                           0.65907   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) trg___ dst___
## trgt_prnt__  0.000              
## dstrctr_p__  0.000 -0.460       
## age_c        0.000  0.000 -0.012
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_3_3,method="Wald")
##                                       2.5 %      97.5 %
## .sig01                                   NA          NA
## .sig02                                   NA          NA
## .sig03                                   NA          NA
## .sigma                                   NA          NA
## (Intercept)                     0.048582656 0.095166080
## target_parent_typicality_z     -0.010285710 0.019311093
## distractor_parent_typicality_z -0.007585384 0.021556141
## age_c                          -0.003816418 0.002413795

No, accounting for experience with the distractor does not explain infants’ accuracy.

Quick Visualization

#by subject
ggplot(parent_typicality,aes(target_parent_typicality_z,average_corrected_target_looking))+
  geom_point(alpha=0.1)+
  geom_smooth(method = "lm")

#by category
ggplot(parent_typicality_category,aes(target_parent_typicality_category_z,average_corrected_target_looking))+
  geom_point(alpha=0.1)+
  geom_smooth(method = "lm")+
  facet_wrap(~category)

hist(parent_typicality$target_parent_typicality_z)

Parent’s ratings of their infant’s experience with each exemplar does not predict accuracy.

Aim 4

We will conduct a series of analyses to determine whether our results hold across a variety of different analytic decisions

4.1. Window

4.2 excluding unknown words

We registered using CDI responses as a way to remove unknown words; however we did not administer the CDI. Thus, we are not including this analysis since it would require an arbitrary cutoff for word recognition as a proxy for understanding a word (i.e., what is the difference between 50% accuracy and 50.01% accuracy)

Calculate CIs per word & per child

Remove any words where CIs overlap with childs CI around chance

Model

Is there a typicality effect when we remove unknown words?

4.3 Reaction time as the dependent measure

We will fit models analogous to those in 1.1 and 1.2 using reaction time as our primary dependent measure rather than accuracy

Load RT (computed using compute_RT.R & rt_helper.R)

rt_path <- here::here("data_analysis","registered_report","data","processed_data","CATegories_exp2_RT_by_trial.csv")
d_rt <- read_csv(rt_path)

d_rt<- trial_corrected_accuracy %>%
  left_join(d_rt)

Visualization of RTs

hist(filter(d_rt, shift_type=="D-T")$shift_start_rt)

The data are right skewed, which is common for RTs. We will use log transformations in the subsequent models to account for the distribution of the data.

Models

4.3.1 Subject-level RT

avg_subj_RT <- d_rt %>%
  filter(shift_type=="D-T")%>%
  group_by(sub_num, child_gender,condition) %>%
  summarize(N=n(),
            average_RT=mean(rt,na.rm=TRUE),
            ci=qt(0.975, N-1)*sd(rt,na.rm=T)/sqrt(N),
            lower_ci=average_RT-ci,
            upper_ci=average_RT+ci)
  

avg_subj_RT <- avg_subj_RT %>%
  mutate(
    typicality_condition_c = case_when(
      condition == "atypical" ~ -0.5,
      condition == "typical" ~ 0.5,
      TRUE ~ NA_real_
    ),
    typicality_condition_typ = case_when(
      condition == "atypical" ~ -1,
      condition == "typical" ~ 0,
      TRUE ~ NA_real_
    ),
    typicality_condition_atyp = case_when(
      condition == "atypical" ~ 0,
      condition == "typical" ~ 1,
      TRUE ~ NA_real_
    ),
  )

m_4_1 <- lmer(log(average_RT) ~ 1 + typicality_condition_c + (1|sub_num),data=avg_subj_RT)

summary(m_4_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: log(average_RT) ~ 1 + typicality_condition_c + (1 | sub_num)
##    Data: avg_subj_RT
## 
## REML criterion at convergence: 110.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.04996 -0.57633  0.04623  0.57349  1.81816 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.04172  0.2043  
##  Residual             0.07332  0.2708  
## Number of obs: 170, groups:  sub_num, 85
## 
## Fixed effects:
##                        Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)             6.63851    0.03037 84.00000 218.617   <2e-16 ***
## typicality_condition_c -0.08136    0.04153 84.00000  -1.959   0.0534 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.000
confint(m_4_1,method="Wald")
##                             2.5 %       97.5 %
## .sig01                         NA           NA
## .sigma                         NA           NA
## (Intercept)             6.5789981 6.698030e+00
## typicality_condition_c -0.1627698 4.102029e-05

There is no significant typicality effect.

4.3.1.1 RT for typical

m_4_1_1 <- lmer(log(average_RT) ~ 1 + typicality_condition_typ + (1|sub_num),data=avg_subj_RT)

summary(m_4_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: log(average_RT) ~ 1 + typicality_condition_typ + (1 | sub_num)
##    Data: avg_subj_RT
## 
## REML criterion at convergence: 110.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.04996 -0.57633  0.04623  0.57349  1.81816 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.04172  0.2043  
##  Residual             0.07332  0.2708  
## Number of obs: 170, groups:  sub_num, 85
## 
## Fixed effects:
##                           Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                6.59783    0.03679 148.47185 179.347   <2e-16 ***
## typicality_condition_typ  -0.08136    0.04153  84.00000  -1.959   0.0534 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.565
confint(m_4_1_1,method="Wald")
##                               2.5 %       97.5 %
## .sig01                           NA           NA
## .sigma                           NA           NA
## (Intercept)               6.5257287 6.669935e+00
## typicality_condition_typ -0.1627698 4.102029e-05

RT for atypical

m_4_1_2 <- lmer(log(average_RT) ~ 1 + typicality_condition_atyp + (1|sub_num),data=avg_subj_RT)

summary(m_4_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: log(average_RT) ~ 1 + typicality_condition_atyp + (1 | sub_num)
##    Data: avg_subj_RT
## 
## REML criterion at convergence: 110.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.04996 -0.57633  0.04623  0.57349  1.81816 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  sub_num  (Intercept) 0.04172  0.2043  
##  Residual             0.07332  0.2708  
## Number of obs: 170, groups:  sub_num, 85
## 
## Fixed effects:
##                            Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                 6.67920    0.03679 148.47185 181.559   <2e-16 ***
## typicality_condition_atyp  -0.08136    0.04153  84.00000  -1.959   0.0534 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ -0.565
confint(m_4_1_2,method="Wald")
##                                2.5 %       97.5 %
## .sig01                            NA           NA
## .sigma                            NA           NA
## (Intercept)                6.6070931 6.751300e+00
## typicality_condition_atyp -0.1627698 4.102029e-05

4.3.2 Trial level RT

trial_rt <- d_rt %>%
  filter(shift_type=="D-T")%>%
  mutate(
    typicality_condition_c = case_when(
      condition == "atypical" ~ -0.5,
      condition == "typical" ~ 0.5,
      TRUE ~ NA_real_
    ),
    typicality_condition_typ = case_when(
      condition == "atypical" ~ -1,
      condition == "typical" ~ 0,
      TRUE ~ NA_real_
    ),
    typicality_condition_atyp = case_when(
      condition == "atypical" ~ 0,
      condition == "typical" ~ 1,
      TRUE ~ NA_real_
    ),
  )

m_4_2 <- lmer(log(rt) ~ 1 + typicality_condition_c + 
            (1+ typicality_condition_c||sub_num) +
            (1|category),
          data=trial_rt)

summary(m_4_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: log(rt) ~ 1 + typicality_condition_c + (1 + typicality_condition_c ||  
##     sub_num) + (1 | category)
##    Data: trial_rt
## 
## REML criterion at convergence: 3364.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7215 -0.6696  0.1085  0.6488  2.3703 
## 
## Random effects:
##  Groups    Name                   Variance  Std.Dev. 
##  sub_num   (Intercept)            2.802e-02 1.674e-01
##  sub_num.1 typicality_condition_c 1.142e-10 1.069e-05
##  category  (Intercept)            2.893e-03 5.379e-02
##  Residual                         6.035e-01 7.769e-01
## Number of obs: 1417, groups:  sub_num, 85; category, 4
## 
## Fixed effects:
##                          Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)               6.38954    0.03867    4.99268 165.214 1.59e-10 ***
## typicality_condition_c   -0.04923    0.04163 1375.82651  -1.183    0.237    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## typclty_cn_ 0.011 
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_4_2,method="Wald")
##                             2.5 %     97.5 %
## .sig01                         NA         NA
## .sig02                         NA         NA
## .sig03                         NA         NA
## .sigma                         NA         NA
## (Intercept)             6.3137398 6.46533987
## typicality_condition_c -0.1308226 0.03236191

4.4 Do effects interact with test session?

We will fit models analogous to those in 1.1 and 1.2 including an interaction with test session